In this report, we reproduce the analyses using data from fMRI study 1 reported in Supplementary Material.

prep data

First, we load the relevant packages, define functions and plotting aesthetics, and load and tidy the data.

load packages

library(pacman)
pacman::p_load(tidyverse, purrr, fs, knitr, lmerTest, ggeffects, kableExtra, boot, devtools, install = TRUE)
devtools::install_github("hadley/emo")

define functions

source("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")

# MLM results table function
table_model = function(model_data) {
  model_data %>%
  broom.mixed::tidy(conf.int = TRUE) %>%
  filter(effect == "fixed") %>%
  rename("SE" = std.error,
         "t" = statistic,
         "p" = p.value) %>%
  select(-group, -effect) %>%
  mutate_at(vars(-contains("term"), -contains("p")), round, 2) %>%
  mutate(term = gsub("cond", "", term),
         term = gsub("\\(Intercept\\)", "intercept", term),
         term = gsub("condother", "other", term),
         term = gsub("condself", "self", term),
         term = gsub("topichealth", "topic (health)", term),
         term = gsub("self_referential", "self-referential", term),
         term = gsub("self_relevance", "self-relevance", term),
         term = gsub("social_relevance", "social relevance", term),
         term = gsub(":", " x ", term),
         p = ifelse(p < .001, "< .001",
             ifelse(p == 1, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p)))),
         `b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", estimate, conf.low, conf.high)) %>%
  select(term, `b [95% CI]`, df, t, p)
}

simple_slopes = function(model, var, moderator, continuous = TRUE) {
  
  if (isTRUE(continuous)) {
    emmeans::emtrends(model, as.formula(paste("~", moderator)), var = var) %>%
      data.frame() %>%
      rename("trend" = 2) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", trend, asymp.LCL, asymp.UCL)) %>%
      select(!!moderator, `b [95% CI]`) %>%
      kable()  %>%
      kableExtra::kable_styling()
    
  } else {
    confint(emmeans::contrast(emmeans::emmeans(model, as.formula(paste("~", var, "|", moderator))), "revpairwise", by = moderator, adjust = "none")) %>%
      data.frame() %>%
      filter(grepl("control", contrast)) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", estimate, asymp.LCL, asymp.UCL)) %>%
      select(contrast, !!moderator, `b [95% CI]`) %>%
      arrange(contrast) %>%
      kable()  %>%
      kableExtra::kable_styling()
  }
}

define aesthetics

palette_condition = c("self" = "#ee9b00",
                      "control" = "#bb3e03",
                      "other" = "#005f73")

palette_sharing = c("#0a9396", "#ee9b00")
palette_roi = c("self-referential" = "#ee9b00",
               "mentalizing" = "#005f73")
palette_dv = c("self-relevance" = "#ee9b00",
               "social relevance" = "#005f73",
               "sharing" = "#56282D")
palette_topic = c("climate" = "#E6805E",
                 "health" = "#3A3357")

plot_aes = theme_minimal() +
  theme(legend.position = "top",
        legend.text = element_text(size = 12),
        text = element_text(size = 16, family = "Futura Medium"),
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        axis.text = element_text(color = "black"),
        axis.line = element_line(colour = "black"),
        axis.ticks.y = element_blank())

load and tidy data

merged_all = read.csv("../data/study1_data.csv")

merged = merged_all %>%
  filter(outlier == "no" | is.na(outlier)) %>%
  group_by(pID, atlas) %>%
  mutate(parameter_estimate_std = parameter_estimate / sd(parameter_estimate, na.rm = TRUE)) 

merged_wide = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(site, pID, trial, topic, cond, value, self_relevance, social_relevance, atlas, parameter_estimate_std) %>%
  spread(atlas, parameter_estimate_std) %>%
  rename("self_referential" = `self-referential`)

merged_wide_alt = merged %>%
  filter(atlas %in% c("pnas_self", "pnas_mentalizing_nopc")) %>%
  select(site, pID, trial, topic, cond, value, self_relevance, social_relevance, atlas, parameter_estimate_std) %>%
  spread(atlas, parameter_estimate_std) %>%
  rename("self_referential" = pnas_self,
         "mentalizing" = pnas_mentalizing_nopc) 



sensitivity analyses

Given the high correlation between the preregistered Neurosynth ROIs, we conducted sensitivity analyses using ROIs from Scholz et al. (2017) A neural model of valuation and information virality.

In order to maximize the differentiation between the self-referential and mentalizing ROIs, we removed the PCC/precuneus cluster from the mentalizing ROI as it overlapped with the self-referential ROI.

ROI correlations

Compared to the preregistered Neurosynth ROIs (r = .94, 95% CI [.93, .94]), the correlation between the alternative ROIs are substantially reduced.

merged_wide_alt %>%
  rmcorr::rmcorr(as.factor(pID), mentalizing, self_referential, data = .)
## 
## Repeated measures correlation
## 
## r
## 0.5635195
## 
## degrees of freedom
## 5862
## 
## p-value
## 0
## 
## 95% confidence interval
## 0.5457931 0.5807417

H1

Is greater activity in the ROIs associated with higher self and social relevance ratings?

self-referential ROI

✅ H1a: Greater activity in the self-referential ROI will be associated with higher self-relevance ratings

mod_h1a =  lmer(self_relevance ~ self_referential + (1 + self_referential | pID),
               data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1a = table_model(mod_h1a)

table_h1a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.56 [2.48, 2.64] 83.53 64.50 < .001
self-referential 0.03 [0.01, 0.06] 82.65 2.58 .012

summary

summary(mod_h1a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16547
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4727 -0.7077  0.1407  0.6782  2.4480 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.11782  0.34325       
##           self_referential 0.00147  0.03834  -0.82
##  Residual                  0.91422  0.95615       
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##                  Estimate Std. Error       df t value            Pr(>|t|)    
## (Intercept)       2.55837    0.03967 83.53210  64.497 <0.0000000000000002 ***
## self_referential  0.03364    0.01302 82.65488   2.583              0.0115 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.343

mentalizing ROI

✅ H1b: Greater activity in the mentalizing ROI will be associated with higher social relevance ratings

mod_h1b = lmer(social_relevance ~ mentalizing + (1 + mentalizing | pID),
               data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1b = table_model(mod_h1b)

table_h1b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.66 [2.58, 2.75] 83.07 63.50 < .001
mentalizing 0.04 [0.01, 0.06] 81.67 3.18 .002

summary

summary(mod_h1b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15633.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8133 -0.7183  0.1706  0.6483  2.6824 
## 
## Random effects:
##  Groups   Name        Variance   Std.Dev. Corr
##  pID      (Intercept) 0.13591052 0.368660     
##           mentalizing 0.00004934 0.007024 0.09
##  Residual             0.78089689 0.883684     
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.66358    0.04194 83.06997  63.502 < 0.0000000000000002 ***
## mentalizing  0.03653    0.01147 81.66916   3.184              0.00205 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.069

combined plot

predicted = ggeffects::ggpredict(mod_h1a, c("self_referential [-4.5:5]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

ind_data = merged_wide %>%
  select(pID, trial, contains("relevance"), mentalizing, self_referential) %>%
  rename("self-referential" = self_referential) %>%
  gather(variable, predicted, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable)) %>%
  gather(roi, x, mentalizing, `self-referential`) %>%
  filter(!(variable == "self-relevance" & roi == "mentalizing") & ! (variable == "social relevance" & roi == "self-referential"))

(plot_h1 = predicted %>%
  ggplot(aes(x, predicted)) +
  stat_smooth(data = ind_data, aes(group = pID, color = roi), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = roi), alpha = .3, color = NA) +
  geom_line(aes(color = roi), size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_roi, guide = FALSE) +
  scale_fill_manual(name = "", values = palette_roi, guide = FALSE) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes  +
  theme(legend.position = "top",
        legend.key.width=unit(2,"cm")))

H4

Do the manipulations increase neural activity in brain regions associated with self-referential processing and mentalizing?

self-referential ROI

✅ H4a: Self-focused intervention (compared to control) will increase brain activity in ROIs related to self-referential processes.

mod_h4a = lmer(self_referential ~ cond + (1 + cond | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4a = table_model(mod_h4a)

table_h4a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.22 [0.12, 0.32] 83.13 4.22 < .001
other 0.10 [0.03, 0.18] 83.46 2.71 .008
self 0.13 [0.05, 0.22] 82.62 3.01 .003

summary

summary(mod_h4a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond + (1 + cond | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17064
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4757 -0.6562 -0.0143  0.6475  3.5902 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.18595  0.4312              
##           condother   0.04294  0.2072   -0.11      
##           condself    0.08488  0.2913    0.08  0.60
##  Residual             0.97663  0.9882              
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##             Estimate Std. Error       df t value  Pr(>|t|)    
## (Intercept)  0.21973    0.05202 83.12970   4.224 0.0000612 ***
## condother    0.10475    0.03869 83.46043   2.707   0.00823 ** 
## condself     0.13441    0.04468 82.62076   3.008   0.00348 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.304       
## condself  -0.160  0.536

mentalizing ROI

❌ H4b: Other-focused intervention (compared to control) will increase brain activity in ROIs related to mentalizing processes.

mod_h4b = lmer(mentalizing ~ cond + (1 + cond | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4b = table_model(mod_h4b)

table_h4b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.25 [0.14, 0.36] 83.17 4.36 < .001
other 0.01 [-0.06, 0.09] 82.26 0.34 .736
self 0.06 [-0.03, 0.14] 82.99 1.37 .176

summary

summary(mod_h4b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond + (1 + cond | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17093.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4722 -0.6562 -0.0008  0.6673  4.0456 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.23402  0.4838              
##           condother   0.04378  0.2092   -0.45      
##           condself    0.07291  0.2700   -0.20  0.87
##  Residual             0.98465  0.9923              
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##             Estimate Std. Error       df t value  Pr(>|t|)    
## (Intercept)  0.24960    0.05730 83.16805   4.356 0.0000375 ***
## condother    0.01314    0.03893 82.25912   0.338     0.736    
## condself     0.05895    0.04315 82.98614   1.366     0.176    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.463       
## condself  -0.329  0.645

combined plot

predicted_h4 = ggeffects::ggpredict(mod_h4a, c("cond")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h4 = merged %>%
  mutate(atlas = recode(atlas, "pnas_self" = "self-referential",
                        "pnas_mentalizing_nopc" = "mentalizing")) %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(pID, cond, run, trial, atlas, parameter_estimate_std) %>%
  unique() %>%
  rename("x" = cond,
         "predicted" = parameter_estimate_std) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

(plot_h4 = predicted_h4 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = ind_data_h4, aes(group = pID), fun = "mean", geom = "line",
               size = .1, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = .75) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "ROI activity (SD)\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H6

Is ROI activity positively related to sharing intentions?

self-referential ROI

✅ Stronger activity in the self-referential ROI will be related to higher sharing intentions.

mod_h6a = lmer(value ~ self_referential + (1 + self_referential | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6a = table_model(mod_h6a)

table_h6a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.60 [2.52, 2.68] 83.97 67.18 < .001
self-referential 0.06 [0.04, 0.09] 82.55 4.45 < .001

summary

summary(mod_h6a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16449.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5541 -0.7224  0.1118  0.7366  2.1967 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.110384 0.33224       
##           self_referential 0.004085 0.06391  -0.34
##  Residual                  0.931278 0.96503       
## Number of obs: 5868, groups:  pID, 84
## 
## Fixed effects:
##                  Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)       2.59876    0.03868 83.96543  67.183 < 0.0000000000000002 ***
## self_referential  0.06404    0.01438 82.55053   4.453            0.0000263 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.244

mentalizing ROI

✅ Stronger activation in the mentalizing ROI will be related to higher sharing intentions.

mod_h6b = lmer(value ~ mentalizing + (1 + mentalizing | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6b = table_model(mod_h6b)

table_h6b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.61 [2.53, 2.68] 83.61 68.01 < .001
mentalizing 0.05 [0.02, 0.07] 82.17 3.64 < .001

summary

summary(mod_h6b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16465.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6303 -0.7152  0.1086  0.7365  2.1138 
## 
## Random effects:
##  Groups   Name        Variance  Std.Dev. Corr
##  pID      (Intercept) 0.1087912 0.32984      
##           mentalizing 0.0006895 0.02626  0.18
##  Residual             0.9362515 0.96760      
## Number of obs: 5868, groups:  pID, 84
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.60635    0.03832 83.60743  68.010 < 0.0000000000000002 ***
## mentalizing  0.04707    0.01292 82.17138   3.644             0.000468 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.051

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h6 = ggeffects::ggpredict(mod_h6a, c("self_referential [vals]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing")) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

ind_data_h6 = merged %>%
  select(pID, cond, run, trial, atlas, parameter_estimate_std, value) %>%
  rename("x" = parameter_estimate_std,
         "predicted" = value,
         "roi" = atlas) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

predicted_h6 %>%
  ggplot(aes(x = x, y = predicted, color = roi, fill = roi)) +
  stat_smooth(data = ind_data_h6, aes(group = pID), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~roi) +
  scale_color_manual(name = "", values = palette_roi) +
  scale_fill_manual(name = "", values = palette_roi) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "none")

H7

Is there an indirect effect of the condition on sharing intentions through activity in self-referential and mentalizing ROIs?

prep data

# source functions
source("indirectMLM.R")

# create self condition dataframe
data_med_self = merged %>%
  filter(!cond == "other" & atlas == "self-referential") %>%
  mutate(cond = ifelse(cond == "self", 1, 0)) %>%
  select(pID, site, trial, cond, value, parameter_estimate) %>%
  data.frame()

# create social condition dataframe
data_med_other = merged %>%
  filter(!cond == "self" & atlas == "mentalizing") %>%
  mutate(cond = ifelse(cond == "other", 1, 0)) %>%
  select(pID, site, trial, cond, value, parameter_estimate) %>%
  data.frame()

# define variables
y_var = "value"
m_var = "parameter_estimate"

self condition

✅ H7a: The effect of self-focused intervention on sharing intention will be mediated by increased activity in the self-referential ROI.

model_name = "mediation_self"
data = data_med_self

if (file.exists(sprintf("models/model_%s_alternative.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s_alternative.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = m_var, group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s_alternative.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: 0.001 [-0.002, 0.011]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.007 [0.002, 0.02]
## Biased Estimate of Within-subjects Indirect Effect: 0.006 [0.001, 0.013]
## Bias in Within-subjects Indirect Effect: 0.001 [0, 0.01]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.047 [-0.108, 0.012]
## Biased Total Effect of X on Y (c path): -0.045 [-0.107, 0.017]
## Bias in Total Effect: 0.002 [0, 0.007]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.054 [-0.115, 0.001]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.033 [0.004, 0.064]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.172 [0.123, 0.255]

other condition

❌ H7b: The effect of other-focused intervention on sharing intention will be mediated by increased activity in the mentalizing ROI.

model_name = "mediation_other"
data = data_med_other

if (file.exists(sprintf("models/model_%s_alternative.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s_alternative.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = m_var, group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s_alternative.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: 0 [-0.005, 0.007]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.003 [-0.003, 0.011]
## Biased Estimate of Within-subjects Indirect Effect: 0.003 [-0.002, 0.009]
## Bias in Within-subjects Indirect Effect: 0 [0, 0.007]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.03 [-0.092, 0.033]
## Biased Total Effect of X on Y (c path): -0.031 [-0.092, 0.032]
## Bias in Total Effect: 0.001 [0, 0.005]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.033 [-0.098, 0.03]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.016 [-0.008, 0.037]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.188 [0.135, 0.308]



combined table

table_h1a %>% mutate(DV = "H1a: Self-relevance") %>%
  bind_rows(table_h1b %>% mutate(DV = "H1b: Social relevance")) %>%
  bind_rows(table_h4a %>% mutate(DV = "H4a: Self-referential ROI")) %>%
  bind_rows(table_h4b %>% mutate(DV = "H4b: Mentalizing ROI")) %>%
  bind_rows(table_h6a %>% mutate(DV = "H6a: Sharing intention")) %>%
  bind_rows(table_h6b %>% mutate(DV = "H6b: Sharing intention")) %>%
  select(DV, everything()) %>%
  kable() %>%
  kable_styling()
DV term b [95% CI] df t p
H1a: Self-relevance intercept 2.56 [2.48, 2.64] 83.53 64.50 < .001
H1a: Self-relevance self-referential 0.03 [0.01, 0.06] 82.65 2.58 .012
H1b: Social relevance intercept 2.66 [2.58, 2.75] 83.07 63.50 < .001
H1b: Social relevance mentalizing 0.04 [0.01, 0.06] 81.67 3.18 .002
H4a: Self-referential ROI intercept 0.22 [0.12, 0.32] 83.13 4.22 < .001
H4a: Self-referential ROI other 0.10 [0.03, 0.18] 83.46 2.71 .008
H4a: Self-referential ROI self 0.13 [0.05, 0.22] 82.62 3.01 .003
H4b: Mentalizing ROI intercept 0.25 [0.14, 0.36] 83.17 4.36 < .001
H4b: Mentalizing ROI other 0.01 [-0.06, 0.09] 82.26 0.34 .736
H4b: Mentalizing ROI self 0.06 [-0.03, 0.14] 82.99 1.37 .176
H6a: Sharing intention intercept 2.60 [2.52, 2.68] 83.97 67.18 < .001
H6a: Sharing intention self-referential 0.06 [0.04, 0.09] 82.55 4.45 < .001
H6b: Sharing intention intercept 2.61 [2.53, 2.68] 83.61 68.01 < .001
H6b: Sharing intention mentalizing 0.05 [0.02, 0.07] 82.17 3.64 < .001

moderation by article topic

These analyses explore whether the analyses reported in study 1 of the main manuscript are moderated by article topic (health or climate).

H1

Are the relationships between ROI activity and self and social relevance ratings moderated by article topic?

self-referential ROI

There is a main effect of topic, such that health articles elicited greater activity in the self-referential ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h1a =  lmer(self_relevance ~ self_referential * topic + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1a = table_model(mod_h1a)

table_h1a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.49 [2.41, 2.58] 100.94 61.33 < .001
self-referential 0.04 [0.00, 0.07] 5942.41 2.13 .033
topic (health) 0.14 [0.09, 0.19] 5861.35 5.48 < .001
self-referential x topic (health) 0.02 [-0.02, 0.06] 5876.17 0.90 .369

summary

summary(mod_h1a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16523
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5549 -0.6985  0.1422  0.6774  2.3790 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1131   0.3364  
##  Residual             0.9094   0.9536  
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##                                Estimate Std. Error         df t value
## (Intercept)                     2.49479    0.04068  100.94100  61.331
## self_referential                0.03571    0.01679 5942.41379   2.127
## topichealth                     0.13659    0.02494 5861.34640   5.477
## self_referential:topichealth    0.02027    0.02254 5876.17326   0.899
##                                          Pr(>|t|)    
## (Intercept)                  < 0.0000000000000002 ***
## self_referential                           0.0334 *  
## topichealth                          0.0000000451 ***
## self_referential:topichealth               0.3686    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf tpchlt
## self_rfrntl -0.029              
## topichealth -0.303  0.037       
## slf_rfrntl:  0.020 -0.679 -0.115

mentalizing ROI

There is a main effect of topic, such that health articles elicited greater activity in the mentalizing ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h1b = lmer(social_relevance ~ mentalizing * topic + (1 + mentalizing | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1b = table_model(mod_h1b)

table_h1b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.51 [2.43, 2.60] 96.40 57.68 < .001
mentalizing 0.04 [0.01, 0.07] 240.27 2.59 .010
topic (health) 0.29 [0.24, 0.33] 5856.99 12.04 < .001
mentalizing x topic (health) 0.00 [-0.04, 0.04] 5822.76 0.12 .907

summary

summary(mod_h1b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing * topic + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15476.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7824 -0.7039  0.1247  0.6733  2.7778 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.136132 0.36896       
##           mentalizing 0.001238 0.03518  -0.12
##  Residual             0.758015 0.87064       
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##                            Estimate  Std. Error          df t value
## (Intercept)                2.514679    0.043594   96.403469  57.684
## mentalizing                0.040792    0.015737  240.274999   2.592
## topichealth                0.286625    0.023810 5856.988678  12.038
## mentalizing:topichealth    0.002438    0.020808 5822.762040   0.117
##                                    Pr(>|t|)    
## (Intercept)             <0.0000000000000002 ***
## mentalizing                          0.0101 *  
## topichealth             <0.0000000000000002 ***
## mentalizing:topichealth              0.9067    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn tpchlt
## mentalizing -0.131              
## topichealth -0.265  0.168       
## mntlzng:tpc  0.072 -0.652 -0.311

combined plot

predicted = ggeffects::ggpredict(mod_h1a, c("self_referential [-4.5:5]", "topic")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]", "topic")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

ind_data = merged_wide %>%
  select(topic, pID, trial, contains("relevance"), mentalizing, self_referential) %>%
  rename("self-referential" = self_referential,
         "group" = topic) %>%
  gather(variable, predicted, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable)) %>%
  gather(roi, x, mentalizing, `self-referential`) %>%
  filter(!(variable == "self-relevance" & roi == "mentalizing") & ! (variable == "social relevance" & roi == "self-referential"))

(plot_h1 = predicted %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = ind_data, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .3, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_topic) +
  scale_fill_manual(name = "", values = palette_topic) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes  +
  theme(legend.position = "top",
        legend.key.width=unit(2,"cm")))

H2

Are the effects of the experimental manipulations on relevance moderated by article topic?

self-relevance

There is a main effect of topic such that health articles are rated as more self-relevant than climate articles.

The was also an interaction such that the effect of the self-focused condition on self-relevance was weaker for health articles.

mod_h2a = lmer(self_relevance ~ cond * topic + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2a = table_model(mod_h2a)

table_h2a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.44 [2.35, 2.53] 188.84 51.39 < .001
other 0.05 [-0.04, 0.13] 5858.26 1.08 .280
self 0.12 [0.04, 0.21] 5858.32 2.88 .004
topic (health) 0.23 [0.15, 0.32] 5858.26 5.41 < .001
other x topic (health) -0.08 [-0.20, 0.04] 5858.32 -1.28 .201
self x topic (health) -0.18 [-0.30, -0.07] 5858.34 -3.05 .002

summary

summary(mod_h2a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ cond * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16532.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5118 -0.6974  0.1450  0.6778  2.4324 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1125   0.3354  
##  Residual             0.9105   0.9542  
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##                         Estimate Std. Error         df t value
## (Intercept)              2.44081    0.04749  188.83870  51.395
## condother                0.04634    0.04287 5858.26358   1.081
## condself                 0.12347    0.04285 5858.31564   2.882
## topichealth              0.23185    0.04286 5858.26268   5.410
## condother:topichealth   -0.07745    0.06062 5858.31558  -1.278
## condself:topichealth    -0.18495    0.06062 5858.34135  -3.051
##                                   Pr(>|t|)    
## (Intercept)           < 0.0000000000000002 ***
## condother                          0.27980    
## condself                           0.00397 ** 
## topichealth                   0.0000000657 ***
## condother:topichealth              0.20146    
## condself:topichealth               0.00229 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.450                            
## condself    -0.450  0.499                     
## topichealth -0.450  0.499  0.499              
## cndthr:tpch  0.318 -0.707 -0.353 -0.707       
## cndslf:tpch  0.318 -0.353 -0.707 -0.707  0.500

social relevance

There is a main effect of topic such that health articles are rated as more socially relevant than climate articles.

These data are not consistent with moderation by topic.

mod_h2b = lmer(social_relevance ~ cond * topic + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2b = table_model(mod_h2b)

table_h2b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.49 [2.39, 2.59] 155.28 51.21 < .001
other 0.03 [-0.04, 0.11] 5858.22 0.88 .379
self 0.08 [0.00, 0.15] 5858.26 1.99 .047
topic (health) 0.31 [0.23, 0.38] 5858.22 7.79 < .001
other x topic (health) 0.03 [-0.08, 0.13] 5858.26 0.48 .634
self x topic (health) -0.07 [-0.17, 0.04] 5858.28 -1.19 .232

summary

summary(mod_h2b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ cond * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15490.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8004 -0.7073  0.1239  0.6693  2.7663 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1344   0.3666  
##  Residual             0.7605   0.8721  
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##                         Estimate Std. Error         df t value
## (Intercept)              2.49009    0.04863  155.28047  51.205
## condother                0.03450    0.03918 5858.22017   0.880
## condself                 0.07777    0.03916 5858.25823   1.986
## topichealth              0.30511    0.03917 5858.21924   7.789
## condother:topichealth    0.02637    0.05540 5858.25766   0.476
## condself:topichealth    -0.06617    0.05540 5858.27669  -1.194
##                                   Pr(>|t|)    
## (Intercept)           < 0.0000000000000002 ***
## condother                           0.3787    
## condself                            0.0471 *  
## topichealth            0.00000000000000792 ***
## condother:topichealth               0.6342    
## condself:topichealth                0.2324    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.402                            
## condself    -0.402  0.499                     
## topichealth -0.402  0.499  0.499              
## cndthr:tpch  0.284 -0.707 -0.353 -0.707       
## cndslf:tpch  0.284 -0.353 -0.707 -0.707  0.500

combined plot

predicted_h2 = ggeffects::ggpredict(mod_h2a, c("cond", "topic")) %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2b, c("cond", "topic")) %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h2 = merged_wide %>%
  rename("x" = cond,
         "group" = topic) %>%
  gather(model, predicted, self_relevance, social_relevance) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         model = gsub("self_relevance", "self-relevance", model),
         model = gsub("social_relevance", "social relevance", model))
  
(plot_h2 = predicted_h2 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h2, aes(group = interaction(pID, group)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~model) +
  scale_color_manual(name = "", values = palette_topic) +
  labs(x = "", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H3

Are the relationships between self and social relevance and sharing intentions moderated by article topic?

The relationship between self-relevance and sharing intentions was not moderated by topic.

However, the relationship between social relevance and sharing intentions was slightly stronger for health articles compared to climate articles.

mod_h3 = lmer(value ~ self_relevance * topic + social_relevance * topic + (1 + self_relevance + social_relevance | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted = ggeffects::ggpredict(mod_h3, c("self_relevance", "topic")) %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3, c("social_relevance", "topic")) %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

points = merged_wide %>%
  rename("self-referential" = self_referential,
         "predicted" = value,
         "group" = topic) %>%
  gather(variable, x, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable))

(plot_rel_sharing = predicted %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = points, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_topic) +
  scale_fill_manual(name = "", values = palette_topic) +
  labs(x = "\nrating", y = "predicted sharing intention\n") +
  plot_aes)

model table

table_h3 = table_model(mod_h3)

table_h3 %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 1.27 [1.12, 1.41] 115.31 17.61 < .001
self-relevance 0.29 [0.24, 0.34] 273.64 10.70 < .001
topic (health) -0.12 [-0.26, 0.02] 5789.39 -1.65 .099
social relevance 0.20 [0.14, 0.27] 175.02 6.40 < .001
self-relevance x topic (health) 0.03 [-0.03, 0.09] 5522.96 0.97 .331
topic (health) x social relevance 0.07 [0.01, 0.13] 5568.67 2.13 .034

summary

summary(mod_h3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_relevance * topic + social_relevance * topic + (1 +  
##     self_relevance + social_relevance | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 14721.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2088 -0.6956  0.0526  0.6876  3.0547 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr       
##  pID      (Intercept)      0.22804  0.4775              
##           self_relevance   0.01222  0.1106   -0.25      
##           social_relevance 0.03279  0.1811   -0.59 -0.55
##  Residual                  0.68314  0.8265              
## Number of obs: 5868, groups:  pID, 84
## 
## Fixed effects:
##                                Estimate Std. Error         df t value
## (Intercept)                     1.26574    0.07188  115.30995  17.610
## self_relevance                  0.28904    0.02702  273.63891  10.696
## topichealth                    -0.11697    0.07089 5789.39410  -1.650
## social_relevance                0.20490    0.03200  175.02426   6.402
## self_relevance:topichealth      0.02974    0.03057 5522.95924   0.973
## topichealth:social_relevance    0.06853    0.03223 5568.67330   2.126
##                                          Pr(>|t|)    
## (Intercept)                  < 0.0000000000000002 ***
## self_relevance               < 0.0000000000000002 ***
## topichealth                                0.0990 .  
## social_relevance                    0.00000000136 ***
## self_relevance:topichealth                 0.3308    
## topichealth:social_relevance               0.0336 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rl tpchlt scl_rl slf_r:
## self_relvnc -0.259                            
## topichealth -0.439  0.184                     
## socil_rlvnc -0.489 -0.657  0.193              
## slf_rlvnc:t  0.154 -0.680 -0.295  0.428       
## tpchlth:sc_  0.197  0.476 -0.467 -0.569 -0.674

H4

Are the effects of the experimental manipulations on ROI activity moderated by article topic?

self-referential ROI

There is a main effect of topic, such that health articles elicited greater activity in the self-referential ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h4a = lmer(self_referential ~ cond * topic + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4a = table_model(mod_h4a)

table_h4a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.00 [-0.12, 0.12] 110.74 0.00 .999
other 0.08 [-0.02, 0.18] 227.91 1.62 .106
self 0.13 [0.03, 0.24] 192.77 2.50 .013
topic (health) 0.14 [0.05, 0.23] 5695.08 3.11 .002
other x topic (health) -0.00 [-0.13, 0.12] 5695.34 -0.05 .958
self x topic (health) -0.07 [-0.19, 0.05] 5694.90 -1.10 .271

summary

summary(mod_h4a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond * topic + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17082
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8283 -0.6554  0.0018  0.6446  3.6336 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.22466  0.4740              
##           condother   0.04152  0.2038   -0.28      
##           condself    0.07313  0.2704   -0.03  0.72
##  Residual             0.97872  0.9893              
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##                            Estimate    Std. Error            df t value
## (Intercept)              0.00009311    0.06049451  110.73867070   0.002
## condother                0.08075608    0.04970368  227.90563489   1.625
## condself                 0.13331889    0.05333976  192.76972041   2.499
## topichealth              0.13807969    0.04444004 5695.07696652   3.107
## condother:topichealth   -0.00332650    0.06285522 5695.33804033  -0.053
## condself:topichealth    -0.06917806    0.06285687 5694.89665189  -1.101
##                       Pr(>|t|)   
## (Intercept)             0.9988   
## condother               0.1056   
## condself                0.0133 * 
## topichealth             0.0019 **
## condother:topichealth   0.9578   
## condself:topichealth    0.2711   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.433                            
## condself    -0.319  0.549                     
## topichealth -0.366  0.446  0.415              
## cndthr:tpch  0.259 -0.632 -0.294 -0.707       
## cndslf:tpch  0.259 -0.315 -0.589 -0.707  0.500

mentalizing ROI

There is a main effect of topic, such that health articles elicited greater activity in the mentalizing ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h4b = lmer(mentalizing ~ cond * topic + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4b = table_model(mod_h4b)

table_h4b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.25 [0.14, 0.36] 114.44 4.34 < .001
other 0.05 [-0.04, 0.15] 232.31 1.06 .290
self 0.11 [0.00, 0.21] 197.42 2.06 .041
topic (health) 0.11 [0.03, 0.20] 5695.22 2.55 .011
other x topic (health) 0.01 [-0.11, 0.13] 5695.48 0.16 .872
self x topic (health) -0.05 [-0.17, 0.07] 5694.97 -0.77 .439

summary

summary(mod_h4b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond * topic + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17081.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6748 -0.6617  0.0224  0.6648  3.2954 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.19581  0.4425              
##           condother   0.03847  0.1961   -0.29      
##           condself    0.06814  0.2610    0.03  0.70
##  Residual             0.98110  0.9905              
## Number of obs: 5947, groups:  pID, 84
## 
## Fixed effects:
##                         Estimate Std. Error         df t value  Pr(>|t|)    
## (Intercept)              0.25030    0.05761  114.44222   4.345 0.0000303 ***
## condother                0.05242    0.04939  232.30976   1.061    0.2896    
## condself                 0.10885    0.05282  197.42357   2.061    0.0407 *  
## topichealth              0.11358    0.04449 5695.21693   2.553    0.0107 *  
## condother:topichealth    0.01011    0.06293 5695.48075   0.161    0.8723    
## condself:topichealth    -0.04872    0.06293 5694.96938  -0.774    0.4388    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.454                            
## condself    -0.311  0.543                     
## topichealth -0.385  0.449  0.420              
## cndthr:tpch  0.272 -0.637 -0.297 -0.707       
## cndslf:tpch  0.272 -0.318 -0.595 -0.707  0.500

combined plot

predicted_h4 = ggeffects::ggpredict(mod_h4a, c("cond", "topic")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond", "topic")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h4 = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(topic, pID, cond, run, trial, atlas, parameter_estimate_std) %>%
  unique() %>%
  rename("x" = cond,
         "predicted" = parameter_estimate_std,
         "group" = topic) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

(plot_h4 = predicted_h4 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h4, aes(group = interaction(pID, group)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_topic) +
  labs(x = "", y = "ROI activity (SD)\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H5

Are the effect of the experimental manipulations on sharing intentions moderated by article topic?

There is a main effect of topic, such that health articles have higher sharing intentions than climate articles.

These data are not consistent with moderation by topic.

mod_h5 = lmer(value ~ cond * topic + (1 | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h5 = ggeffects::ggpredict(mod_h5, c("cond", "topic")) %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h5 = merged_wide %>%
  rename("x" = cond,
         "predicted" = value,
         "group" = topic) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))
  
predicted_h5 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h5, aes(group = interaction(pID, group)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  scale_color_manual(name = "", values = palette_topic) +
  labs(x = "", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

model table

table_h5 = table_model(mod_h5)

table_h5 %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.52 [2.43, 2.62] 197.78 53.52 < .001
other -0.05 [-0.13, 0.04] 5779.57 -1.13 .257
self -0.04 [-0.12, 0.05] 5779.56 -0.86 .388
topic (health) 0.24 [0.16, 0.33] 5779.51 5.53 < .001
other x topic (health) 0.03 [-0.09, 0.15] 5779.61 0.56 .575
self x topic (health) -0.02 [-0.14, 0.10] 5779.62 -0.28 .780

summary

summary(mod_h5)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ cond * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16398.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5077 -0.7365  0.1038  0.7469  2.1729 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1077   0.3281  
##  Residual             0.9242   0.9614  
## Number of obs: 5868, groups:  pID, 84
## 
## Fixed effects:
##                         Estimate Std. Error         df t value
## (Intercept)              2.52468    0.04717  197.78012  53.519
## condother               -0.04927    0.04350 5779.56613  -1.132
## condself                -0.03760    0.04351 5779.56259  -0.864
## topichealth              0.24045    0.04346 5779.50506   5.533
## condother:topichealth    0.03448    0.06148 5779.61159   0.561
## condself:topichealth    -0.01714    0.06149 5779.62394  -0.279
##                                   Pr(>|t|)    
## (Intercept)           < 0.0000000000000002 ***
## condother                            0.257    
## condself                             0.388    
## topichealth                   0.0000000329 ***
## condother:topichealth                0.575    
## condself:topichealth                 0.780    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.460                            
## condself    -0.459  0.498                     
## topichealth -0.460  0.499  0.499              
## cndthr:tpch  0.325 -0.708 -0.353 -0.707       
## cndslf:tpch  0.325 -0.353 -0.708 -0.707  0.500

H6

Are the relationships between ROI activity positively and sharing intentions moderated by article topic?

self-referential ROI

There is a main effect of topic, such that health articles have higher sharing intentions than climate articles.

These data are not consistent with moderation by topic.

mod_h6a = lmer(value ~ self_referential * topic + (1 + self_referential | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6a = table_model(mod_h6a)

table_h6a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.49 [2.41, 2.57] 101.94 61.80 < .001
self-referential 0.08 [0.04, 0.11] 242.14 4.38 < .001
topic (health) 0.24 [0.19, 0.29] 5779.56 9.40 < .001
self-referential x topic (health) 0.00 [-0.04, 0.05] 5761.40 0.05 .963

summary

summary(mod_h6a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential * topic + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16355.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5103 -0.7321  0.1018  0.7524  2.1599 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.109417 0.33078       
##           self_referential 0.001753 0.04186  -0.30
##  Residual                  0.916362 0.95727       
## Number of obs: 5868, groups:  pID, 84
## 
## Fixed effects:
##                                 Estimate  Std. Error          df t value
## (Intercept)                     2.488948    0.040277  101.942906  61.796
## self_referential                0.077337    0.017653  242.135232   4.381
## topichealth                     0.237322    0.025237 5779.560650   9.404
## self_referential:topichealth    0.001047    0.022848 5761.399945   0.046
##                                          Pr(>|t|)    
## (Intercept)                  < 0.0000000000000002 ***
## self_referential                        0.0000176 ***
## topichealth                  < 0.0000000000000002 ***
## self_referential:topichealth                0.963    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf tpchlt
## self_rfrntl -0.103              
## topichealth -0.310  0.039       
## slf_rfrntl:  0.023 -0.659 -0.118

mentalizing ROI

There is a main effect of topic, such that health articles have higher sharing intentions than climate articles.

These data are not consistent with moderation by topic.

mod_h6b = lmer(value ~ mentalizing * topic + (1 + mentalizing | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6b = table_model(mod_h6b)

table_h6b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.47 [2.39, 2.55] 103.91 60.85 < .001
mentalizing 0.08 [0.04, 0.11] 245.83 4.55 < .001
topic (health) 0.25 [0.19, 0.30] 5774.79 9.31 < .001
mentalizing x topic (health) -0.02 [-0.06, 0.03] 5754.14 -0.82 .415

summary

summary(mod_h6b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing * topic + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16364.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4775 -0.7326  0.1065  0.7613  2.1307 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.109564 0.33100       
##           mentalizing 0.001079 0.03285  -0.15
##  Residual             0.918209 0.95823       
## Number of obs: 5868, groups:  pID, 84
## 
## Fixed effects:
##                           Estimate Std. Error         df t value
## (Intercept)                2.47112    0.04061  103.91197  60.851
## mentalizing                0.07880    0.01732  245.82977   4.550
## topichealth                0.24584    0.02641 5774.79468   9.308
## mentalizing:topichealth   -0.01881    0.02307 5754.13787  -0.816
##                                     Pr(>|t|)    
## (Intercept)             < 0.0000000000000002 ***
## mentalizing                       0.00000843 ***
## topichealth             < 0.0000000000000002 ***
## mentalizing:topichealth                0.415    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn tpchlt
## mentalizing -0.156              
## topichealth -0.316  0.175       
## mntlzng:tpc  0.088 -0.661 -0.315

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h6 = ggeffects::ggpredict(mod_h6a, c("self_referential [vals]", "topic")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]", "topic")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h6 = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(topic, pID, cond, run, trial, atlas, parameter_estimate_std, value) %>%
  rename("x" = parameter_estimate_std,
         "predicted" = value,
         "group" = topic) %>%
  mutate(atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

predicted_h6 %>%
  ggplot(aes(x = x, y = predicted, color = group, fill = group)) +
  stat_smooth(data = ind_data_h6, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_topic) +
  scale_fill_manual(name = "", values = palette_topic) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "top")

combined table

table_h1a %>% mutate(DV = "H1a: Self-relevance") %>%
  bind_rows(table_h1b %>% mutate(DV = "H1b: Social relevance")) %>%
  bind_rows(table_h2a %>% mutate(DV = "H2a: Self-relevance")) %>%
  bind_rows(table_h2b %>% mutate(DV = "H2b: Social relevance")) %>%
  bind_rows(table_h3 %>% mutate(DV = "H3a-b: Sharing intention")) %>%
  bind_rows(table_h4a %>% mutate(DV = "H4a: Self-referential ROI")) %>%
  bind_rows(table_h4b %>% mutate(DV = "H4b: Mentalizing ROI")) %>%
  bind_rows(table_h5 %>% mutate(DV = "H5: Sharing intention")) %>%
  bind_rows(table_h6a %>% mutate(DV = "H6a: Sharing intention")) %>%
  bind_rows(table_h6b %>% mutate(DV = "H6b: Sharing intention")) %>%
  select(DV, everything()) %>%
  kable() %>%
  kable_styling()
DV term b [95% CI] df t p
H1a: Self-relevance intercept 2.49 [2.41, 2.58] 100.94 61.33 < .001
H1a: Self-relevance self-referential 0.04 [0.00, 0.07] 5942.41 2.13 .033
H1a: Self-relevance topic (health) 0.14 [0.09, 0.19] 5861.35 5.48 < .001
H1a: Self-relevance self-referential x topic (health) 0.02 [-0.02, 0.06] 5876.17 0.90 .369
H1b: Social relevance intercept 2.51 [2.43, 2.60] 96.40 57.68 < .001
H1b: Social relevance mentalizing 0.04 [0.01, 0.07] 240.27 2.59 .010
H1b: Social relevance topic (health) 0.29 [0.24, 0.33] 5856.99 12.04 < .001
H1b: Social relevance mentalizing x topic (health) 0.00 [-0.04, 0.04] 5822.76 0.12 .907
H2a: Self-relevance intercept 2.44 [2.35, 2.53] 188.84 51.39 < .001
H2a: Self-relevance other 0.05 [-0.04, 0.13] 5858.26 1.08 .280
H2a: Self-relevance self 0.12 [0.04, 0.21] 5858.32 2.88 .004
H2a: Self-relevance topic (health) 0.23 [0.15, 0.32] 5858.26 5.41 < .001
H2a: Self-relevance other x topic (health) -0.08 [-0.20, 0.04] 5858.32 -1.28 .201
H2a: Self-relevance self x topic (health) -0.18 [-0.30, -0.07] 5858.34 -3.05 .002
H2b: Social relevance intercept 2.49 [2.39, 2.59] 155.28 51.21 < .001
H2b: Social relevance other 0.03 [-0.04, 0.11] 5858.22 0.88 .379
H2b: Social relevance self 0.08 [0.00, 0.15] 5858.26 1.99 .047
H2b: Social relevance topic (health) 0.31 [0.23, 0.38] 5858.22 7.79 < .001
H2b: Social relevance other x topic (health) 0.03 [-0.08, 0.13] 5858.26 0.48 .634
H2b: Social relevance self x topic (health) -0.07 [-0.17, 0.04] 5858.28 -1.19 .232
H3a-b: Sharing intention intercept 1.27 [1.12, 1.41] 115.31 17.61 < .001
H3a-b: Sharing intention self-relevance 0.29 [0.24, 0.34] 273.64 10.70 < .001
H3a-b: Sharing intention topic (health) -0.12 [-0.26, 0.02] 5789.39 -1.65 .099
H3a-b: Sharing intention social relevance 0.20 [0.14, 0.27] 175.02 6.40 < .001
H3a-b: Sharing intention self-relevance x topic (health) 0.03 [-0.03, 0.09] 5522.96 0.97 .331
H3a-b: Sharing intention topic (health) x social relevance 0.07 [0.01, 0.13] 5568.67 2.13 .034
H4a: Self-referential ROI intercept 0.00 [-0.12, 0.12] 110.74 0.00 .999
H4a: Self-referential ROI other 0.08 [-0.02, 0.18] 227.91 1.62 .106
H4a: Self-referential ROI self 0.13 [0.03, 0.24] 192.77 2.50 .013
H4a: Self-referential ROI topic (health) 0.14 [0.05, 0.23] 5695.08 3.11 .002
H4a: Self-referential ROI other x topic (health) -0.00 [-0.13, 0.12] 5695.34 -0.05 .958
H4a: Self-referential ROI self x topic (health) -0.07 [-0.19, 0.05] 5694.90 -1.10 .271
H4b: Mentalizing ROI intercept 0.25 [0.14, 0.36] 114.44 4.34 < .001
H4b: Mentalizing ROI other 0.05 [-0.04, 0.15] 232.31 1.06 .290
H4b: Mentalizing ROI self 0.11 [0.00, 0.21] 197.42 2.06 .041
H4b: Mentalizing ROI topic (health) 0.11 [0.03, 0.20] 5695.22 2.55 .011
H4b: Mentalizing ROI other x topic (health) 0.01 [-0.11, 0.13] 5695.48 0.16 .872
H4b: Mentalizing ROI self x topic (health) -0.05 [-0.17, 0.07] 5694.97 -0.77 .439
H5: Sharing intention intercept 2.52 [2.43, 2.62] 197.78 53.52 < .001
H5: Sharing intention other -0.05 [-0.13, 0.04] 5779.57 -1.13 .257
H5: Sharing intention self -0.04 [-0.12, 0.05] 5779.56 -0.86 .388
H5: Sharing intention topic (health) 0.24 [0.16, 0.33] 5779.51 5.53 < .001
H5: Sharing intention other x topic (health) 0.03 [-0.09, 0.15] 5779.61 0.56 .575
H5: Sharing intention self x topic (health) -0.02 [-0.14, 0.10] 5779.62 -0.28 .780
H6a: Sharing intention intercept 2.49 [2.41, 2.57] 101.94 61.80 < .001
H6a: Sharing intention self-referential 0.08 [0.04, 0.11] 242.14 4.38 < .001
H6a: Sharing intention topic (health) 0.24 [0.19, 0.29] 5779.56 9.40 < .001
H6a: Sharing intention self-referential x topic (health) 0.00 [-0.04, 0.05] 5761.40 0.05 .963
H6b: Sharing intention intercept 2.47 [2.39, 2.55] 103.91 60.85 < .001
H6b: Sharing intention mentalizing 0.08 [0.04, 0.11] 245.83 4.55 < .001
H6b: Sharing intention topic (health) 0.25 [0.19, 0.30] 5774.79 9.31 < .001
H6b: Sharing intention mentalizing x topic (health) -0.02 [-0.06, 0.03] 5754.14 -0.82 .415

cite packages

report::cite_packages()
##   - Angelo Canty and Brian Ripley (2021). boot: Bootstrap R (S-Plus) Functions. R package version 1.3-28.
##   - Douglas Bates, Martin Maechler and Mikael Jagan (2023). Matrix: Sparse and Dense Matrix Classes and Methods. R package version 1.5-4. https://CRAN.R-project.org/package=Matrix
##   - Douglas Bates, Martin Maechler, Ben Bolker, Steve Walker (2015). Fitting Linear Mixed-Effects Models Using lme4. Journal of Statistical Software, 67(1), 1-48. doi:10.18637/jss.v067.i01.
##   - H. Wickham. ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York, 2016.
##   - Hadley Wickham (2019). stringr: Simple, Consistent Wrappers for Common String Operations. R package version 1.4.0. https://CRAN.R-project.org/package=stringr
##   - Hadley Wickham (2021). forcats: Tools for Working with Categorical Variables (Factors). R package version 0.5.1. https://CRAN.R-project.org/package=forcats
##   - Hadley Wickham and Maximilian Girlich (2022). tidyr: Tidy Messy Data. R package version 1.2.0. https://CRAN.R-project.org/package=tidyr
##   - Hadley Wickham, Jennifer Bryan and Malcolm Barrett (2021). usethis: Automate Package and Project Setup. R package version 2.1.5. https://CRAN.R-project.org/package=usethis
##   - Hadley Wickham, Jim Hester and Jennifer Bryan (2022). readr: Read Rectangular Text Data. R package version 2.1.2. https://CRAN.R-project.org/package=readr
##   - Hadley Wickham, Jim Hester, Winston Chang and Jennifer Bryan (2021). devtools: Tools to Make Developing R Packages Easier. R package version 2.4.3. https://CRAN.R-project.org/package=devtools
##   - Hadley Wickham, Romain François, Lionel Henry and Kirill Müller (2022). dplyr: A Grammar of Data Manipulation. R package version 1.0.9. https://CRAN.R-project.org/package=dplyr
##   - Hao Zhu (2021). kableExtra: Construct Complex Table with 'kable' and Pipe Syntax. R package version 1.3.4. https://CRAN.R-project.org/package=kableExtra
##   - Jim Hester, Hadley Wickham and Gábor Csárdi (2021). fs: Cross-Platform File System Operations Based on 'libuv'. R package version 1.5.2. https://CRAN.R-project.org/package=fs
##   - Kirill Müller and Hadley Wickham (2022). tibble: Simple Data Frames. R package version 3.1.8. https://CRAN.R-project.org/package=tibble
##   - Kuznetsova A, Brockhoff PB, Christensen RHB (2017). "lmerTest Package:Tests in Linear Mixed Effects Models." _Journal of StatisticalSoftware_, *82*(13), 1-26. doi: 10.18637/jss.v082.i13 (URL:https://doi.org/10.18637/jss.v082.i13).
##   - Lionel Henry and Hadley Wickham (2020). purrr: Functional Programming Tools. R package version 0.3.4. https://CRAN.R-project.org/package=purrr
##   - Lüdecke D (2018). "ggeffects: Tidy Data Frames of Marginal Effects fromRegression Models." _Journal of Open Source Software_, *3*(26), 772.doi: 10.21105/joss.00772 (URL: https://doi.org/10.21105/joss.00772).
##   - R Core Team (2021). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
##   - Rinker, T. W. & Kurkiewicz, D. (2017). pacman: Package Management for R. version 0.5.0. Buffalo, New York. http://github.com/trinker/pacman
##   - Wickham et al., (2019). Welcome to the tidyverse. Journal of Open Source Software, 4(43), 1686, https://doi.org/10.21105/joss.01686
##   - Yihui Xie (2021). knitr: A General-Purpose Package for Dynamic Report Generation in R. R package version 1.37.